summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiam <byteslice@airmail.cc>2022-11-09 04:26:14 +0100
committerLiam <byteslice@airmail.cc>2022-11-09 14:09:50 +0100
commit71c0e20f95861a3766c04bc92afe677205848407 (patch)
treeba422f92b33046bb7c0f5304f1a882a84a2d8f2a
parentMerge pull request #9195 from vonchenplus/vmm_kinds_error (diff)
downloadyuzu-71c0e20f95861a3766c04bc92afe677205848407.tar
yuzu-71c0e20f95861a3766c04bc92afe677205848407.tar.gz
yuzu-71c0e20f95861a3766c04bc92afe677205848407.tar.bz2
yuzu-71c0e20f95861a3766c04bc92afe677205848407.tar.lz
yuzu-71c0e20f95861a3766c04bc92afe677205848407.tar.xz
yuzu-71c0e20f95861a3766c04bc92afe677205848407.tar.zst
yuzu-71c0e20f95861a3766c04bc92afe677205848407.zip
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp9
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
2 files changed, 9 insertions, 3 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index b1cabbca0..d6676904b 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) {
void KScheduler::ScheduleImpl() {
// First, clear the needs scheduling bool.
- m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+ m_state.needs_scheduling.store(false, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
// Load the appropriate thread pointers for scheduling.
KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
@@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() {
// If there aren't, we want to check if the highest priority thread is the same as the current
// thread.
if (highest_priority_thread == cur_thread) {
- // If they're the same, then we can just return.
+ // If they're the same, then we can just issue a memory barrier and return.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
return;
}
@@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() {
// We failed to successfully do the context switch, and need to retry.
// Clear needs_scheduling.
- m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+ m_state.needs_scheduling.store(false, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
// Refresh the highest priority thread.
highest_priority_thread = m_state.highest_priority_thread;
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 73314b45e..129d60472 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -60,6 +60,9 @@ public:
// Release an instance of the lock.
if ((--lock_count) == 0) {
+ // Perform a memory barrier here.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel);